home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Maclife 157
/
MACLIFE157-2001-09.ISO.7z
/
MACLIFE157-2001-09.ISO
/
Linux
/
MacOS Tools
/
Other
/
BootX 1.1.3 (for Old Mac OS)
/
Sources
/
src
/
bootstrap.c
next >
Wrap
Text File
|
2001-07-23
|
5KB
|
221 lines
/* This is BootX boostrap. This code is called by BootX in supervisor mode
* with whatever mappings have been set by MacOS.
*
* The unmangler will first move itself up in memory in case it's current location
* overlaps the destination of the kernel. The boostrap copies itself
* and jump to the new bootstrap code.
*
* Note that all "decisions" regarding this copy are taken by BootX, the stack
* pointer (r1) is already set to the destination stack for the kernel
* (the boostrap has no stack).
*
* Then, it will copy kernel pages to their destination from a map passed in
* by BootX. This map is appended to the unmangler and a pointer to it is
* passed in r9 (pointer to the destination location of the unmangler).
* This map is actually a kind of "script" with a serie of copy operations
* to perform (a list of src/dest/size entries).
*
* Registers on entry:
*
* r2 = 1 on entry by BootX
* r3 = 0x426f6f58 ('BooX')
* r4 = pointer to boot_infos (in kernel destination)
* r5 = fb address (for BAT mapping)
* r6 = physical address of me
* r7 = physical address where I must be copied.
* r8 = size to copy (size of unmangler)
* r9 = unmangle map (pointer to list of copy operations)
* r10 = kernel entry in destination
* r11 = 1 indicates need to setup a BAT mapping
*
* The unmangle map is a list of tuples of 3 longs each: a source address,
* a destination address, and a size. A size of zero indicates the end of
* the list.
*
* Portions of this code from Gabriel Paubert's PReP bootloader, other
* portions from the Linux kernel itself (arch/ppc/kernel/head.S)
*
*/
#include "processor.h"
#ifndef BROKEN_THIRD_PARTY_CARDS
#define BROKEN_THIRD_PARTY_CARDS 1
#endif
#if BROKEN_THIRD_PARTY_CARDS
#define __sync(x) ¥
li r0,0; ¥
cmpwi r0,0; ¥
bne+ toto##x; ¥
sync; ¥
toto##x:
#else
#define __sync(x) sync
#endif
asm void start(void)
{
first_bootstrap:
/* Disable interrupts (clear MSR_EE). rlwinm is more "smart"
but less readable (our constants are bit masks). */
mfmsr r0
ori r0,r0,MSR_EE
xori r0,r0,MSR_EE
mtmsr r0
__sync(0)
/* Check if MMU was already turned off */
cmplwi r2,0
beq already_off
/* turn the MMU off */
mfmsr r0
ori r0,r0,MSR_IR|MSR_DR /* |MSR_IP */
li r2,0
xori r0,r0,MSR_IR|MSR_DR /* |MSR_IP */
mtsrr0 r6
mtsrr1 r0
__sync(1) /* I heard that some 601 will like this one */
rfi
already_off:
/* save some parameters */
mr r31,r3
mr r30,r4
mr r29,r5
/* flush TLBs, ivalidate BATs */
bl flush_tlb
bl inval_BATs
/* Need to copy myself ? */
cmpw r6,r7
beq skip_copy
/* Copy ourselves */
mr r3,r7
mr r4,r6
mr r5,r8
li r6,0
bl copy_and_flush
mr r6, r7 /* update address of me */
mr r3,r31 /* restore parameters */
mr r4,r30
mr r5,r29
li r2,0 /* not necessary since no code here changes it, but... */
mtlr r7 /* Jump to copy of me (*) */
blr
/* (*) note that we jump at the beginning of the copy. We could jump
to skip_copy directly if CW knew how to calculate the offset
directly in the inline assembler. This will be done in the next
version of the compiler, in the meantime, we just re-enter at
the beginning
*/
skip_copy:
/* Call unmangle code */
bl unmangle_kernel
/* Jump to kernel */
mr r3,r31 /* restore parameters */
mr r4,r30
li r5,0 /* clear r5 */
mtlr r10
blr
/* Unmangle kernel code. This routine will read the array prepared by
* BootX and follow instructions in it for copying kernel pages until
* the kernel is fully reconstitued
*/
unmangle_kernel:
mflr r13
unmangle_loop:
lwz r4,0(r9)
lwz r3,4(r9)
lwz r5,8(r9)
li r6,0
addi r9,r9,12
cmpwi r5,0
beq unmangle_finish
bl copy_and_flush
b unmangle_loop
unmangle_finish:
mtlr r13
blr
/*
* Copy routine used to copy a piece of code and flush and invalidate
* the caches as needed.
* r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
* on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
*/
copy_and_flush:
addi r5,r5,-4
addi r6,r6,-4
@4:
li r0,8
mtctr r0
@3:
addi r6,r6,4 /* copy a cache line */
lwzx r0,r6,r4
stwx r0,r6,r3
bdnz @3
dcbst r6,r3 /* write it to memory */
__sync(3)
icbi r6,r3 /* flush the icache line */
cmplw r6,r5
blt @4
isync
addi r5,r5,4
addi r6,r6,4
blr
/* Routine for invalidating all BATs */
inval_BATs:
li r3,0
mfspr r28,PVR
rlwinm r28,r28,16,16,31 /* r28 = 1 for 601, 4 for 604 */
cmpwi r28,1
beq is_601_2
mtdbatu r3,0
mtdbatu r3,1
mtdbatu r3,2
mtdbatu r3,3
is_601_2:
mtibatu r3,0
mtibatl r3,0
mtibatu r3,1
mtibatl r3,1
mtibatu r3,2
mtibatl r3,2
mtibatu r3,3
mtibatl r3,3
blr
/* Due to the PPC architecture (and according to the specifications), a
* series of tlbie which goes through a whole 256 MB segment always flushes
* the whole TLB. This is obviously overkill and slow, but who cares ?
* It takes about 1 ms on a 200 MHz 603e and works even if residual data
* get the number of TLB entries wrong.
*/
flush_tlb:
lis r28,0x1000
flush_tlb_loop:
addic. r28,r28,-0x1000
tlbie r28
blt flush_tlb_loop
/* tlbsync is not implemented on 601, so use sync which seems to be a superset
* of tlbsync in all cases and do not bother with CPU dependant code
*/
__sync(4)
blr
}